In [1]:
import os
import pandas as pd
import preprocess
import cv2
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import numpy as np
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input, decode_predictions

pd.set_option("display.max_rows",999)
np.random.seed(42)
Using TensorFlow backend.
In [2]:
DATA_DIR_PATH = preprocess.DATA_DIR_PATH
NUM_CLASSES = 120
In [3]:
labels = pd.read_csv(os.path.join(preprocess.DATA_DIR, 'labels.csv'))

def get_img_cls_from_id(img_id):
    return labels[labels.id == img_id]['breed'].values[0]

Breed by Frequency, Note classes are imbalanced

In [4]:
labels.groupby('breed').count().sort_values(by='id', ascending=False)
Out[4]:
id
breed
scottish_deerhound 126
maltese_dog 117
afghan_hound 116
entlebucher 115
bernese_mountain_dog 114
shih-tzu 112
great_pyrenees 111
pomeranian 111
basenji 110
samoyed 109
airedale 107
tibetan_terrier 107
leonberg 106
cairn 106
beagle 105
japanese_spaniel 105
australian_terrier 102
blenheim_spaniel 102
miniature_pinscher 102
irish_wolfhound 101
lakeland_terrier 99
saluki 99
papillon 96
whippet 95
siberian_husky 95
norwegian_elkhound 95
pug 94
chow 93
italian_greyhound 92
pembroke 92
ibizan_hound 91
border_terrier 91
newfoundland 91
lhasa 90
silky_terrier 90
bedlington_terrier 89
dandie_dinmont 89
irish_setter 88
sealyham_terrier 88
rhodesian_ridgeback 88
old_english_sheepdog 87
collie 87
boston_bull 87
english_foxhound 86
bouvier_des_flandres 86
african_hunting_dog 86
schipperke 86
kelpie 86
weimaraner 85
bloodhound 85
bluetick 85
saint_bernard 84
labrador_retriever 84
chesapeake_bay_retriever 83
norfolk_terrier 83
english_setter 83
wire-haired_fox_terrier 82
kerry_blue_terrier 82
scotch_terrier 82
yorkshire_terrier 82
groenendael 82
greater_swiss_mountain_dog 82
irish_terrier 82
basset 82
keeshond 81
west_highland_white_terrier 81
gordon_setter 81
malamute 81
affenpinscher 80
toy_poodle 80
clumber 80
mexican_hairless 80
dingo 80
standard_poodle 79
miniature_poodle 79
staffordshire_bullterrier 79
welsh_springer_spaniel 79
toy_terrier 79
sussex_spaniel 78
norwich_terrier 78
appenzeller 78
irish_water_spaniel 78
miniature_schnauzer 78
black-and-tan_coonhound 77
cardigan 76
dhole 76
shetland_sheepdog 76
rottweiler 76
english_springer 75
great_dane 75
german_short-haired_pointer 75
boxer 75
bull_mastiff 75
borzoi 75
pekinese 75
cocker_spaniel 74
american_staffordshire_terrier 74
doberman 74
brittany_spaniel 73
malinois 73
standard_schnauzer 72
flat-coated_retriever 72
redbone 72
border_collie 72
curly-coated_retriever 72
kuvasz 71
chihuahua 71
soft-coated_wheaten_terrier 71
french_bulldog 70
vizsla 70
tibetan_mastiff 69
german_shepherd 69
giant_schnauzer 69
walker_hound 69
otterhound 69
golden_retriever 67
brabancon_griffon 67
komondor 67
briard 66
eskimo_dog 66
In [5]:
def read_image_as_rbg(img_id = None, shape = None):
    #img_id = '0a0c223352985ec154fd604d7ddceabd'
    img_BGR = cv2.imread(os.path.join(DATA_DIR_PATH, 'train', f'{img_id}.jpg'))
    if shape is not None:
        img_BGR = cv2.resize(img_BGR, shape)
    return cv2.cvtColor(img_BGR, cv2.COLOR_BGR2RGB)
In [6]:
#labels[labels.breed == 'scottish_deerhound'].id.tolist()[:5]
In [7]:
#4 Breed I chose fo fun
breeds = ['scottish_deerhound', 'maltese_dog', 'pomeranian', 'cocker_spaniel']
In [8]:
NUM_SAMPLE_PER_BREED = 6

fig = plt.figure(1, figsize=(32, 32))
grid = ImageGrid(fig, 111, nrows_ncols=(len(breeds), NUM_SAMPLE_PER_BREED), axes_pad=0.05)

idx = 0
for breed in breeds:
    for img_id in labels[labels.breed == breed].id.tolist()[:6]:
        ax = grid[idx]
        img = read_image_as_rbg(img_id=img_id)
        ax.imshow(img)
        ax.axis('off')
        idx += 1
plt.show()

there already exists a model for classifying Maltese Dogs and Scottish Deerhounds

We can use the weights of any model that's been trained on imagenet keras comes with some pre-trained "classical" models that are trained on existing datasets VGG16 is a "16-layer network used by the VGG team in the ILSVRC-2014 competition"

TODO: show image of the model

In [9]:
model_vgg16 = VGG16(weights='imagenet')
In [10]:
img_id = '0042188c895a2f14ef64a918ed9c7b64'
In [11]:
img = read_image_as_rbg(img_id=img_id, shape = (224, 224))
print(img.shape, img.max(), img.min())
(224, 224, 3) 254 0
In [12]:
x = preprocess_input(np.expand_dims(img.copy() + 0.0, axis = 0)) #return (1, 224, 224, 3)
preds = model_vgg16.predict(x)
In [13]:
res = decode_predictions(preds, top=2)
res
Out[13]:
[[('n02092002', 'Scottish_deerhound', 0.78298795),
  ('n02090721', 'Irish_wolfhound', 0.19540323)]]
In [14]:
def plot_image_and_labels(img_arr, true_cls, pred1, pred2):
    _, cls1, prob1 = pred1
    _,cls2, prob2 = pred2
#     cls, prob = 'Scottish_deerhound', 0.78298694
#     breed = 'scottish'
    plt.imshow(img)
    plt.text(10, 200, 'Predicted: %s (%.3f)' % (cls1 , prob1), color='w', backgroundcolor='k', alpha=0.8)
    plt.text(10, 220, 'Predicted: %s (%.3f)' % (cls2 , prob2), color='w', backgroundcolor='k', alpha=0.8)
    plt.text(10, 15, 'LABEL: %s' % breed, color='k', backgroundcolor='w', alpha=0.8)
    plt.axis('off')
    plt.show()
In [15]:
plot_image_and_labels(x, get_img_cls_from_id(img_id), pred1 = res[0][0], pred2=res[0][1])
In [16]:
NUM_SAMPLE_PER_BREED = 6

fig = plt.figure(1, figsize=(32, 32))
grid = ImageGrid(fig, 111, nrows_ncols=(len(breeds), NUM_SAMPLE_PER_BREED), axes_pad=0.05)

idx = 0
for breed in breeds:
    print('starting', breed)
    for img_id in labels[labels.breed == breed].id.tolist()[:6]:
        ax = grid[idx]
        img = read_image_as_rbg(img_id=img_id, shape = (224, 224))
        x = preprocess_input(np.expand_dims(img.copy().astype(float), axis = 0)) #return (1, 224, 224, 3)
        preds = model_vgg16.predict(x)
        _, cls, prob = decode_predictions(preds, top=1)[0][0]
        ax.imshow(img)
        ax.text(10, 200, 'Predicted: %s (%.3f)' % (cls , prob), color='w', backgroundcolor='k', alpha=0.8)
        ax.text(10, 15, 'LABEL: %s' % breed, color='k', backgroundcolor='w', alpha=0.8)
        ax.axis('off')
        idx += 1
plt.show()
starting scottish_deerhound
starting maltese_dog
starting pomeranian
starting cocker_spaniel

Create Training and Test Dataset

In [17]:
tmp = np.vstack( (np.array([1,1,1]), np.array([0,0,0])) )
tmp.sum(axis = 0)
Out[17]:
array([1, 1, 1])
In [18]:
x_train_list_id = []
y_train_labels = None
BREED_TO_IDX_MAP = { breed:idx for idx, breed in enumerate(breeds)}
IDX_TO_BREED_MAP = { idx:breed for idx, breed in enumerate(breeds)}

for idx, breed in enumerate(breeds):
    BREED_TO_IDX_MAP[breed] = idx
    breed_list = labels[labels.breed == breed].id.tolist() 
    num_ids = len(breed_list)
    
    breed_y = np.zeros((len(breed_list), len(breeds)))
    breed_y[:, idx] = 1 #mark response for this breed at this index
    
    
    x_train_list_id.extend( breed_list )
    
    print(len(breed_list), breed_y.shape)

    if y_train_labels is None:
        y_train_labels = breed_y
    else:
        y_train_labels = np.vstack( (y_train_labels, breed_y) )

        
print(BREED_TO_IDX_MAP)
print(IDX_TO_BREED_MAP)
print(len(x_train_list_id), y_train_labels.shape, y_train_labels.sum(axis = 0))
    
126 (126, 4)
117 (117, 4)
111 (111, 4)
74 (74, 4)
{'scottish_deerhound': 0, 'maltese_dog': 1, 'pomeranian': 2, 'cocker_spaniel': 3}
{0: 'scottish_deerhound', 1: 'maltese_dog', 2: 'pomeranian', 3: 'cocker_spaniel'}
428 (428, 4) [ 126.  117.  111.   74.]
In [19]:
def decode_arr(input_arr):
    """
    input_arr: vector of size 4
    """
    assert input_arr.shape == (4,), 'input wrong shape'
    return IDX_TO_BREED_MAP[input_arr.argmax()]
    
    

Make Matrix of training images scaled to 200 by 200

In [20]:
def get_image_matrix_from_ids(list_of_ids, output_size = (200, 200)):
    img_matrix = np.zeros((len(list_of_ids), output_size[0], output_size[1], 3), dtype='float32') #stores all of the images in a 4 dim tensor  
    for idx, img_id in enumerate(list_of_ids):
        img = read_image_as_rbg(img_id=img_id, shape=output_size)
        img_matrix[idx] = img
    return img_matrix
In [21]:
x_train = get_image_matrix_from_ids(x_train_list_id)
In [22]:
def normalize_images(img_tensor):
    return (img_tensor - 128.0)/ 128.0 #scales all values to -1.0 to 1.0
In [23]:
NUM_SAMPLES = x_train.shape[0]

idx_arr =  np.arange(NUM_SAMPLES)
np.random.shuffle(idx_arr)
print('sample of random array', idx_arr[:25])

train_arr = idx_arr[:int(NUM_SAMPLES * 0.8)]
valid_arr = idx_arr[int(NUM_SAMPLES * 0.8):]

X_train_arr = x_train[train_arr]
X_valid_arr = x_train[valid_arr]

y_train_arr = y_train_labels[train_arr]
y_valid_arr = y_train_labels[valid_arr]

print(X_train_arr.shape, y_train_arr.shape)
print(y_train_arr.sum(axis = 0))

print(y_valid_arr.sum(axis = 0))
sample of random array [305 249 247  74  59 261 373 378 187 392 193 165 352 103 369 405 327 107
   5 151  58   4 234 217 348]
(342, 200, 200, 3) (342, 4)
[ 99.  96.  90.  57.]
[ 27.  21.  21.  17.]
In [24]:
for i in range(20):
    plt.imshow(X_train_arr[i].astype('uint8'))
    plt.show()
    print(decode_arr(y_train_arr[i]))
pomeranian
pomeranian
pomeranian
scottish_deerhound
scottish_deerhound
pomeranian
cocker_spaniel
cocker_spaniel
maltese_dog
cocker_spaniel
maltese_dog
maltese_dog
pomeranian
scottish_deerhound
cocker_spaniel
cocker_spaniel
pomeranian
scottish_deerhound
scottish_deerhound
maltese_dog
In [25]:
decode_arr(y_train_arr[20])
Out[25]:
'scottish_deerhound'
In [26]:
import numpy as np
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.optimizers import SGD
import time
In [27]:
#model.reset_states()
In [28]:
#based on mnist architecture and way too slow to train

model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', input_shape=(200, 200, 3)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2))) #usually this variable is 2, 2
model.add(Dropout(0.25))

model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(Conv2D(256, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(Conv2D(512, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4, activation='softmax'))

sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
model.summary()
#model.fit(x_train, y_train, batch_size=32, epochs=10)
#score = model.evaluate(x_test, y_test, batch_size=32)
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_1 (Conv2D)            (None, 198, 198, 32)      896       
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 196, 196, 32)      9248      
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 98, 98, 32)        0         
_________________________________________________________________
dropout_1 (Dropout)          (None, 98, 98, 32)        0         
_________________________________________________________________
conv2d_3 (Conv2D)            (None, 96, 96, 64)        18496     
_________________________________________________________________
conv2d_4 (Conv2D)            (None, 94, 94, 64)        36928     
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 47, 47, 64)        0         
_________________________________________________________________
dropout_2 (Dropout)          (None, 47, 47, 64)        0         
_________________________________________________________________
conv2d_5 (Conv2D)            (None, 45, 45, 128)       73856     
_________________________________________________________________
conv2d_6 (Conv2D)            (None, 43, 43, 128)       147584    
_________________________________________________________________
max_pooling2d_3 (MaxPooling2 (None, 21, 21, 128)       0         
_________________________________________________________________
dropout_3 (Dropout)          (None, 21, 21, 128)       0         
_________________________________________________________________
conv2d_7 (Conv2D)            (None, 19, 19, 256)       295168    
_________________________________________________________________
conv2d_8 (Conv2D)            (None, 17, 17, 256)       590080    
_________________________________________________________________
max_pooling2d_4 (MaxPooling2 (None, 8, 8, 256)         0         
_________________________________________________________________
dropout_4 (Dropout)          (None, 8, 8, 256)         0         
_________________________________________________________________
conv2d_9 (Conv2D)            (None, 6, 6, 512)         1180160   
_________________________________________________________________
conv2d_10 (Conv2D)           (None, 4, 4, 512)         2359808   
_________________________________________________________________
max_pooling2d_5 (MaxPooling2 (None, 2, 2, 512)         0         
_________________________________________________________________
dropout_5 (Dropout)          (None, 2, 2, 512)         0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 2048)              0         
_________________________________________________________________
dense_1 (Dense)              (None, 512)               1049088   
_________________________________________________________________
dropout_6 (Dropout)          (None, 512)               0         
_________________________________________________________________
dense_2 (Dense)              (None, 4)                 2052      
=================================================================
Total params: 5,763,364
Trainable params: 5,763,364
Non-trainable params: 0
_________________________________________________________________
In [29]:
X_train_arr_norm = normalize_images(X_train_arr)
X_valid_arr_norm = normalize_images(X_valid_arr)
In [30]:
def make_confusion_matrix(pred, actual, num_classes):
    cm = np.zeros((num_classes, num_classes))
    num_results = pred.shape[0]
    for i in range(num_results):
        cm[pred[i], actual[i]] += 1.0
    return cm
In [31]:
start = time.time()
num_round = 20
num_epochs_per_round = 10
print('Total Number of Epochs')
for i in range(num_round):
    print('Training Step: ', i)
    model.fit(X_train_arr_norm, y_train_arr, batch_size=32, epochs=num_epochs_per_round)
    pred_prob = model.predict_proba(X_valid_arr_norm)
    
    acc = (pred_prob.argmax(axis = 1) == y_valid_arr.argmax(axis = 1)).astype('float32').mean()
    loss = model.evaluate(X_valid_arr, y_valid_arr, batch_size=32)
    #print(pred_prob.round(3))
    print('Summary Stats on Predictions:')
    print(pred_prob.min(axis = 0))
    print(pred_prob.max(axis = 0))
    print(pred_prob.mean(axis = 0))
    print(pred_prob.std(axis = 0))
    print(make_confusion_matrix(pred_prob.argmax(axis = 1),  y_valid_arr.argmax(axis = 1), 4))
    print('Prediction Validation Accuracy and Loss', acc, loss)
    
print('Total Elapsed Time', time.time() - start)
Total Number of Epochs
Training Step:  0
Epoch 1/10
342/342 [==============================] - 21s 61ms/step - loss: 1.3859
Epoch 2/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3850
Epoch 3/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3803
Epoch 4/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3756
Epoch 5/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3712
Epoch 6/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3769
Epoch 7/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3677
Epoch 8/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3652
Epoch 9/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3694
Epoch 10/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3678
86/86 [==============================] - 1s 7ms/step
86/86 [==============================] - 1s 8ms/step
Summary Stats on Predictions:
[ 0.26497039  0.26130179  0.25325996  0.20459194]
[ 0.27686045  0.26800776  0.25770822  0.21354012]
[ 0.27139843  0.26512891  0.2550945   0.20837806]
[ 0.00259566  0.00162287  0.00101749  0.00168129]
[[ 27.  18.  18.  17.]
 [  0.   3.   3.   0.]
 [  0.   0.   0.   0.]
 [  0.   0.   0.   0.]]
Prediction Validation Accuracy and Loss 0.348837 2.75668780194
Training Step:  1
Epoch 1/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3640
Epoch 2/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3603
Epoch 3/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3666
Epoch 4/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3562
Epoch 5/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3567
Epoch 6/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3536
Epoch 7/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3519
Epoch 8/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3508
Epoch 9/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3413
Epoch 10/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3422
86/86 [==============================] - 1s 7ms/step
86/86 [==============================] - 1s 7ms/step
Summary Stats on Predictions:
[ 0.25431556  0.23930433  0.24561742  0.18659408]
[ 0.29600695  0.29384607  0.26954627  0.20917271]
[ 0.27485928  0.27075821  0.25680852  0.19757399]
[ 0.00958787  0.01331462  0.00552346  0.00548639]
[[ 21.   4.   5.  14.]
 [  6.  17.  16.   3.]
 [  0.   0.   0.   0.]
 [  0.   0.   0.   0.]]
Prediction Validation Accuracy and Loss 0.44186 3.8262479028
Training Step:  2
Epoch 1/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3331
Epoch 2/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3369
Epoch 3/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3273
Epoch 4/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3317
Epoch 5/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3226
Epoch 6/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3109
Epoch 7/10
342/342 [==============================] - 9s 25ms/step - loss: 1.3152
Epoch 8/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2976
Epoch 9/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2963
Epoch 10/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2872
86/86 [==============================] - 1s 7ms/step
86/86 [==============================] - 1s 7ms/step
Summary Stats on Predictions:
[ 0.20012935  0.15878768  0.20572372  0.14011514]
[ 0.36407986  0.38356245  0.32727119  0.25924113]
[ 0.27440557  0.2744301   0.25806296  0.19310133]
[ 0.03892497  0.05859273  0.02558773  0.03077034]
[[ 20.   5.   4.  10.]
 [  5.  13.  16.   4.]
 [  2.   3.   1.   3.]
 [  0.   0.   0.   0.]]
Prediction Validation Accuracy and Loss 0.395349 4.6183258101
Training Step:  3
Epoch 1/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2948
Epoch 2/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2871
Epoch 3/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2692
Epoch 4/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2589
Epoch 5/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2435
Epoch 6/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2626
Epoch 7/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2442
Epoch 8/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2497
Epoch 9/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2439
Epoch 10/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2344
86/86 [==============================] - 1s 7ms/step
86/86 [==============================] - 1s 7ms/step
Summary Stats on Predictions:
[ 0.14990143  0.08463985  0.16747962  0.09367149]
[ 0.4533911   0.49127114  0.37418711  0.28008834]
[ 0.29020464  0.27923518  0.25182521  0.17873494]
[ 0.08028386  0.11230586  0.04397558  0.04985944]
[[ 21.   5.   4.  12.]
 [  4.  13.  16.   4.]
 [  2.   3.   1.   1.]
 [  0.   0.   0.   0.]]
Prediction Validation Accuracy and Loss 0.406977 6.02723190396
Training Step:  4
Epoch 1/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2425
Epoch 2/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2516
Epoch 3/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2220
Epoch 4/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2092
Epoch 5/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2128
Epoch 6/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2163
Epoch 7/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2030
Epoch 8/10
342/342 [==============================] - 9s 25ms/step - loss: 1.1934
Epoch 9/10
342/342 [==============================] - 9s 25ms/step - loss: 1.1993
Epoch 10/10
342/342 [==============================] - 9s 25ms/step - loss: 1.1847
86/86 [==============================] - 1s 7ms/step
86/86 [==============================] - 1s 7ms/step
Summary Stats on Predictions:
[ 0.10686314  0.07143812  0.17346129  0.06706087]
[ 0.43485653  0.59867221  0.39443672  0.29268169]
[ 0.25885144  0.31296128  0.26383874  0.16434863]
[ 0.09234731  0.14731371  0.05034706  0.06232392]
[[ 19.   1.   4.   9.]
 [  4.  18.  16.   5.]
 [  4.   2.   1.   3.]
 [  0.   0.   0.   0.]]
Prediction Validation Accuracy and Loss 0.44186 6.56983994329
Training Step:  5
Epoch 1/10
342/342 [==============================] - 9s 25ms/step - loss: 1.1924
Epoch 2/10
342/342 [==============================] - 9s 25ms/step - loss: 1.2068
Epoch 3/10
342/342 [==============================] - 9s 25ms/step - loss: 1.1786
Epoch 4/10
342/342 [==============================] - 9s 25ms/step - loss: 1.1700
Epoch 5/10
342/342 [==============================] - 9s 25ms/step - loss: 1.1570
Epoch 6/10
342/342 [==============================] - 9s 25ms/step - loss: 1.1857
Epoch 7/10
342/342 [==============================] - 9s 25ms/step - loss: 1.1782
Epoch 8/10
342/342 [==============================] - 9s 25ms/step - loss: 1.1515
Epoch 9/10
342/342 [==============================] - 9s 25ms/step - loss: 1.1643
Epoch 10/10
342/342 [==============================] - 9s 25ms/step - loss: 1.1436
86/86 [==============================] - 1s 7ms/step
86/86 [==============================] - 1s 7ms/step
Summary Stats on Predictions:
[ 0.08667286  0.06763534  0.13899814  0.05475324]
[ 0.47290146  0.68590677  0.42733613  0.28836784]
[ 0.24719061  0.34039214  0.25762284  0.15479441]
[ 0.11961114  0.17986841  0.06271999  0.07028234]
[[ 19.   1.   0.  10.]
 [  5.  19.  20.   4.]
 [  3.   1.   1.   3.]
 [  0.   0.   0.   0.]]
Prediction Validation Accuracy and Loss 0.453488 8.57062719035
Training Step:  6
Epoch 1/10
342/342 [==============================] - 9s 25ms/step - loss: 1.1337
Epoch 2/10
342/342 [==============================] - 9s 25ms/step - loss: 1.1234
Epoch 3/10
342/342 [==============================] - 9s 25ms/step - loss: 1.0947
Epoch 4/10
342/342 [==============================] - 9s 25ms/step - loss: 1.1039
Epoch 5/10
342/342 [==============================] - 9s 25ms/step - loss: 1.0868
Epoch 6/10
342/342 [==============================] - 9s 25ms/step - loss: 1.0714
Epoch 7/10
342/342 [==============================] - 9s 25ms/step - loss: 1.0426
Epoch 8/10
342/342 [==============================] - 9s 25ms/step - loss: 1.0241
Epoch 9/10
342/342 [==============================] - 9s 25ms/step - loss: 1.0087
Epoch 10/10
342/342 [==============================] - 9s 25ms/step - loss: 1.0112
86/86 [==============================] - 1s 7ms/step
86/86 [==============================] - 1s 7ms/step
Summary Stats on Predictions:
[ 0.04348349  0.05722236  0.07477614  0.03308731]
[ 0.65489495  0.79584235  0.64062941  0.28761911]
[ 0.24881241  0.34185791  0.26387024  0.14545956]
[ 0.18946201  0.22221836  0.15025102  0.0745915 ]
[[ 21.   1.   2.   9.]
 [  2.  18.   8.   1.]
 [  4.   2.  11.   7.]
 [  0.   0.   0.   0.]]
Prediction Validation Accuracy and Loss 0.581395 10.9815884967
Training Step:  7
Epoch 1/10
342/342 [==============================] - 9s 25ms/step - loss: 0.9887
Epoch 2/10
342/342 [==============================] - 9s 25ms/step - loss: 0.9686
Epoch 3/10
342/342 [==============================] - 9s 25ms/step - loss: 0.9434
Epoch 4/10
342/342 [==============================] - 9s 25ms/step - loss: 0.9655
Epoch 5/10
342/342 [==============================] - 9s 25ms/step - loss: 0.9610
Epoch 6/10
342/342 [==============================] - 9s 25ms/step - loss: 0.9522
Epoch 7/10
342/342 [==============================] - 9s 25ms/step - loss: 0.9227
Epoch 8/10
342/342 [==============================] - 9s 25ms/step - loss: 0.9943
Epoch 9/10
342/342 [==============================] - 9s 25ms/step - loss: 0.9024
Epoch 10/10
342/342 [==============================] - 9s 25ms/step - loss: 0.9407
86/86 [==============================] - 1s 7ms/step
86/86 [==============================] - 1s 7ms/step
Summary Stats on Predictions:
[ 0.01805105  0.03509674  0.03788279  0.01929466]
[ 0.7846666   0.87408304  0.73649633  0.3677679 ]
[ 0.21660119  0.33791468  0.30463782  0.14084631]
[ 0.22633632  0.25180089  0.20710142  0.08998235]
[[ 16.   1.   2.   5.]
 [  3.  18.   6.   1.]
 [  7.   2.  13.  10.]
 [  1.   0.   0.   1.]]
Prediction Validation Accuracy and Loss 0.55814 10.4197664261
Training Step:  8
Epoch 1/10
342/342 [==============================] - 9s 25ms/step - loss: 0.9093
Epoch 2/10
342/342 [==============================] - 9s 25ms/step - loss: 0.9441
Epoch 3/10
342/342 [==============================] - 9s 25ms/step - loss: 0.9215
Epoch 4/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8918
Epoch 5/10
342/342 [==============================] - 9s 25ms/step - loss: 0.9050
Epoch 6/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8905
Epoch 7/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8855
Epoch 8/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8730
Epoch 9/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8830
Epoch 10/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8583
86/86 [==============================] - 1s 7ms/step
86/86 [==============================] - 1s 7ms/step
Summary Stats on Predictions:
[ 0.0106497   0.00505319  0.01331576  0.00916449]
[ 0.91925204  0.94584155  0.77497345  0.45673069]
[ 0.3786138   0.23200999  0.24100567  0.14837059]
[ 0.31436035  0.2761105   0.22883509  0.09715267]
[[ 24.   5.   2.  10.]
 [  1.  10.   5.   1.]
 [  2.   6.  14.   5.]
 [  0.   0.   0.   1.]]
Prediction Validation Accuracy and Loss 0.569767 10.4464830354
Training Step:  9
Epoch 1/10
342/342 [==============================] - 9s 25ms/step - loss: 0.9001
Epoch 2/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8832
Epoch 3/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8622
Epoch 4/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8401
Epoch 5/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8616
Epoch 6/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8481
Epoch 7/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8562
Epoch 8/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8487
Epoch 9/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8306
Epoch 10/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8414
86/86 [==============================] - 1s 7ms/step
86/86 [==============================] - 1s 7ms/step
Summary Stats on Predictions:
[ 0.00435637  0.01661341  0.02457951  0.0064147 ]
[ 0.85289657  0.96464944  0.73431796  0.4021841 ]
[ 0.23376562  0.35133186  0.27802822  0.13687426]
[ 0.25767046  0.29822612  0.21614346  0.10803001]
[[ 19.   1.   2.   5.]
 [  2.  18.   7.   1.]
 [  5.   2.  12.   6.]
 [  1.   0.   0.   5.]]
Prediction Validation Accuracy and Loss 0.627907 9.68782016843
Training Step:  10
Epoch 1/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8492
Epoch 2/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8056
Epoch 3/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7805
Epoch 4/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8013
Epoch 5/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8205
Epoch 6/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8220
Epoch 7/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8081
Epoch 8/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8087
Epoch 9/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7833
Epoch 10/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7856
86/86 [==============================] - 1s 7ms/step
86/86 [==============================] - 1s 7ms/step
Summary Stats on Predictions:
[ 0.00349158  0.00359512  0.00524657  0.00424113]
[ 0.96110612  0.9816367   0.74031979  0.49292189]
[ 0.39529526  0.30291462  0.18272015  0.11906998]
[ 0.35346732  0.32516313  0.2116718   0.10584262]
[[ 23.   1.   2.  11.]
 [  2.  18.   7.   1.]
 [  0.   2.  11.   2.]
 [  2.   0.   1.   3.]]
Prediction Validation Accuracy and Loss 0.639535 9.94888611727
Training Step:  11
Epoch 1/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7890
Epoch 2/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7715
Epoch 3/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7812
Epoch 4/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7354
Epoch 5/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7600
Epoch 6/10
342/342 [==============================] - 9s 25ms/step - loss: 0.8026
Epoch 7/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7641
Epoch 8/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7644
Epoch 9/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7650
Epoch 10/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7276
86/86 [==============================] - 1s 7ms/step
86/86 [==============================] - 1s 7ms/step
Summary Stats on Predictions:
[ 0.00094824  0.00328993  0.00525453  0.00188044]
[ 0.96249294  0.99144202  0.81201881  0.50826949]
[ 0.3308835   0.33981553  0.21775377  0.11154722]
[ 0.35123926  0.35112381  0.24926402  0.11093712]
[[ 22.   1.   2.   8.]
 [  2.  18.   7.   1.]
 [  3.   2.  12.   6.]
 [  0.   0.   0.   2.]]
Prediction Validation Accuracy and Loss 0.627907 8.77415399773
Training Step:  12
Epoch 1/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7621
Epoch 2/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7095
Epoch 3/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7511
Epoch 4/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7371
Epoch 5/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7390
Epoch 6/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7339
Epoch 7/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7043
Epoch 8/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7176
Epoch 9/10
342/342 [==============================] - 9s 25ms/step - loss: 0.6946
Epoch 10/10
342/342 [==============================] - 9s 25ms/step - loss: 0.6687
86/86 [==============================] - 1s 7ms/step
86/86 [==============================] - 1s 7ms/step
Summary Stats on Predictions:
[ 0.00106241  0.00255402  0.00447446  0.00114023]
[ 0.98001111  0.99236906  0.85033453  0.51886469]
[ 0.32003641  0.32529351  0.26031151  0.09435863]
[ 0.35827282  0.33444783  0.28042552  0.10671135]
[[ 21.   1.   2.   7.]
 [  2.  18.   6.   1.]
 [  4.   2.  13.   6.]
 [  0.   0.   0.   3.]]
Prediction Validation Accuracy and Loss 0.639535 8.38281098077
Training Step:  13
Epoch 1/10
342/342 [==============================] - 9s 25ms/step - loss: 0.6992
Epoch 2/10
342/342 [==============================] - 9s 25ms/step - loss: 0.7066
Epoch 3/10
342/342 [==============================] - 9s 25ms/step - loss: 0.6955
Epoch 4/10
342/342 [==============================] - 9s 25ms/step - loss: 0.6617
Epoch 5/10
342/342 [==============================] - 9s 25ms/step - loss: 0.6773
Epoch 6/10
342/342 [==============================] - 9s 25ms/step - loss: 0.6435
Epoch 7/10
342/342 [==============================] - 9s 25ms/step - loss: 0.6635
Epoch 8/10
342/342 [==============================] - 9s 25ms/step - loss: 0.6627
Epoch 9/10
342/342 [==============================] - 9s 25ms/step - loss: 0.6313
Epoch 10/10
342/342 [==============================] - 9s 25ms/step - loss: 0.6387
86/86 [==============================] - 1s 7ms/step
86/86 [==============================] - 1s 7ms/step
Summary Stats on Predictions:
[ 0.00015993  0.00502233  0.00152014  0.00059009]
[ 0.96738786  0.99772984  0.86604607  0.61864436]
[ 0.26115316  0.36212534  0.26013759  0.11658389]
[ 0.32454947  0.35505942  0.28668353  0.13782729]
[[ 19.   1.   2.   6.]
 [  2.  18.   6.   2.]
 [  4.   2.  13.   5.]
 [  2.   0.   0.   4.]]
Prediction Validation Accuracy and Loss 0.627907 8.24295480861
Training Step:  14
Epoch 1/10
342/342 [==============================] - 9s 25ms/step - loss: 0.6448
Epoch 2/10
342/342 [==============================] - 9s 25ms/step - loss: 0.6108
Epoch 3/10
342/342 [==============================] - 9s 25ms/step - loss: 0.5958
Epoch 4/10
342/342 [==============================] - 9s 25ms/step - loss: 0.5997
Epoch 5/10
342/342 [==============================] - 9s 25ms/step - loss: 0.6370
Epoch 6/10
342/342 [==============================] - 9s 25ms/step - loss: 0.6061
Epoch 7/10
342/342 [==============================] - 9s 25ms/step - loss: 0.5744
Epoch 8/10
342/342 [==============================] - 9s 25ms/step - loss: 0.5936
Epoch 9/10
342/342 [==============================] - 9s 25ms/step - loss: 0.6137
Epoch 10/10
342/342 [==============================] - 9s 25ms/step - loss: 0.5461
86/86 [==============================] - 1s 7ms/step
86/86 [==============================] - 1s 7ms/step
Summary Stats on Predictions:
[  2.14359734e-05   3.58051475e-04   2.98958999e-04   5.67971802e-05]
[ 0.99605787  0.99947912  0.9031074   0.73230326]
[ 0.34134984  0.30382442  0.22616246  0.12866326]
[ 0.38255951  0.36082864  0.29240379  0.1647999 ]
[[ 21.   2.   2.   7.]
 [  2.  17.   7.   1.]
 [  2.   2.  12.   5.]
 [  2.   0.   0.   4.]]
Prediction Validation Accuracy and Loss 0.627907 8.91701081742
Training Step:  15
Epoch 1/10
342/342 [==============================] - 9s 25ms/step - loss: 0.6032
Epoch 2/10
342/342 [==============================] - 9s 25ms/step - loss: 0.5774
Epoch 3/10
342/342 [==============================] - 9s 25ms/step - loss: 0.5516
Epoch 4/10
342/342 [==============================] - 9s 25ms/step - loss: 0.5267
Epoch 5/10
342/342 [==============================] - 9s 25ms/step - loss: 0.5763
Epoch 6/10
342/342 [==============================] - 9s 25ms/step - loss: 0.4848
Epoch 7/10
342/342 [==============================] - 9s 25ms/step - loss: 0.5201
Epoch 8/10
342/342 [==============================] - 9s 25ms/step - loss: 0.5212
Epoch 9/10
342/342 [==============================] - 9s 25ms/step - loss: 0.5296
Epoch 10/10
342/342 [==============================] - 9s 25ms/step - loss: 0.5348
86/86 [==============================] - 1s 7ms/step
86/86 [==============================] - 1s 7ms/step
Summary Stats on Predictions:
[  7.64292054e-06   1.99845002e-04   1.20975754e-04   1.74648703e-05]
[ 0.99083871  0.99981874  0.95235378  0.83154106]
[ 0.29609823  0.29124588  0.27884549  0.13381045]
[ 0.36182365  0.34958047  0.31720012  0.18372065]
[[ 18.   2.   2.   6.]
 [  2.  15.   5.   1.]
 [  3.   4.  14.   6.]
 [  4.   0.   0.   4.]]
Prediction Validation Accuracy and Loss 0.593023 8.34310072522
Training Step:  16
Epoch 1/10
342/342 [==============================] - 9s 25ms/step - loss: 0.5412
Epoch 2/10
342/342 [==============================] - 9s 25ms/step - loss: 0.5537
Epoch 3/10
342/342 [==============================] - 9s 25ms/step - loss: 0.4848
Epoch 4/10
342/342 [==============================] - 9s 25ms/step - loss: 0.5387
Epoch 5/10
342/342 [==============================] - 9s 25ms/step - loss: 0.4615
Epoch 6/10
342/342 [==============================] - 9s 25ms/step - loss: 0.4814
Epoch 7/10
342/342 [==============================] - 9s 25ms/step - loss: 0.4477
Epoch 8/10
342/342 [==============================] - 9s 25ms/step - loss: 0.4254
Epoch 9/10
342/342 [==============================] - 9s 25ms/step - loss: 0.4269
Epoch 10/10
342/342 [==============================] - 9s 25ms/step - loss: 0.4725
86/86 [==============================] - 1s 7ms/step
86/86 [==============================] - 1s 7ms/step
Summary Stats on Predictions:
[  9.40042082e-06   1.18122101e-04   1.20146040e-04   1.33487001e-05]
[ 0.99631917  0.99978787  0.93539411  0.91047037]
[ 0.34183696  0.28782827  0.22822711  0.14210781]
[ 0.38821343  0.34898233  0.29970461  0.20310394]
[[ 21.   2.   2.   6.]
 [  2.  18.   5.   1.]
 [  1.   1.  12.   5.]
 [  3.   0.   2.   5.]]
Prediction Validation Accuracy and Loss 0.651163 9.72382957991
Training Step:  17
Epoch 1/10
342/342 [==============================] - 9s 25ms/step - loss: 0.4401
Epoch 2/10
342/342 [==============================] - 9s 25ms/step - loss: 0.4472
Epoch 3/10
342/342 [==============================] - 9s 25ms/step - loss: 0.4201
Epoch 4/10
342/342 [==============================] - 9s 25ms/step - loss: 0.3396
Epoch 5/10
342/342 [==============================] - 9s 25ms/step - loss: 0.3312
Epoch 6/10
342/342 [==============================] - 9s 25ms/step - loss: 0.3807
Epoch 7/10
342/342 [==============================] - 9s 25ms/step - loss: 0.3554
Epoch 8/10
342/342 [==============================] - 9s 25ms/step - loss: 0.3701
Epoch 9/10
342/342 [==============================] - 9s 25ms/step - loss: 0.3858
Epoch 10/10
342/342 [==============================] - 9s 25ms/step - loss: 0.3708
86/86 [==============================] - 1s 7ms/step
86/86 [==============================] - 1s 7ms/step
Summary Stats on Predictions:
[  3.89516487e-07   2.03179097e-05   1.43748448e-05   8.18632884e-07]
[ 0.99922907  0.99997222  0.98417747  0.94220495]
[ 0.33030152  0.34622374  0.21985617  0.10361859]
[ 0.41043627  0.40375987  0.33974513  0.19762887]
[[ 21.   1.   2.   6.]
 [  2.  19.   7.   2.]
 [  1.   1.  11.   6.]
 [  3.   0.   1.   3.]]
Prediction Validation Accuracy and Loss 0.627907 8.45775879261
Training Step:  18
Epoch 1/10
342/342 [==============================] - 9s 25ms/step - loss: 0.3529
Epoch 2/10
342/342 [==============================] - 9s 25ms/step - loss: 0.2933
Epoch 3/10
342/342 [==============================] - 9s 25ms/step - loss: 0.3068
Epoch 4/10
342/342 [==============================] - 9s 25ms/step - loss: 0.3319
Epoch 5/10
342/342 [==============================] - 9s 25ms/step - loss: 0.2673
Epoch 6/10
342/342 [==============================] - 9s 25ms/step - loss: 0.2727
Epoch 7/10
342/342 [==============================] - 9s 25ms/step - loss: 0.2484
Epoch 8/10
342/342 [==============================] - 9s 25ms/step - loss: 0.2515
Epoch 9/10
342/342 [==============================] - 9s 25ms/step - loss: 0.2720
Epoch 10/10
342/342 [==============================] - 9s 25ms/step - loss: 0.3060
86/86 [==============================] - 1s 7ms/step
86/86 [==============================] - 1s 7ms/step
Summary Stats on Predictions:
[  1.38587808e-08   1.64192716e-06   9.81597736e-07   3.68272772e-08]
[ 0.99846125  0.99999833  0.99518698  0.99233001]
[ 0.27576944  0.34647632  0.21956715  0.15818715]
[ 0.37989536  0.40585241  0.33144945  0.24447714]
[[ 18.   1.   2.   6.]
 [  2.  18.   8.   1.]
 [  1.   2.  11.   5.]
 [  6.   0.   0.   5.]]
Prediction Validation Accuracy and Loss 0.604651 8.85708950841
Training Step:  19
Epoch 1/10
342/342 [==============================] - 9s 25ms/step - loss: 0.3126
Epoch 2/10
342/342 [==============================] - 9s 25ms/step - loss: 0.2616
Epoch 3/10
342/342 [==============================] - 9s 25ms/step - loss: 0.1985
Epoch 4/10
342/342 [==============================] - 9s 25ms/step - loss: 0.2113
Epoch 5/10
342/342 [==============================] - 9s 25ms/step - loss: 0.2177
Epoch 6/10
342/342 [==============================] - 9s 25ms/step - loss: 0.2183
Epoch 7/10
342/342 [==============================] - 9s 25ms/step - loss: 0.2120
Epoch 8/10
342/342 [==============================] - 9s 25ms/step - loss: 0.2212
Epoch 9/10
342/342 [==============================] - 9s 25ms/step - loss: 0.2555
Epoch 10/10
342/342 [==============================] - 9s 25ms/step - loss: 0.2053
86/86 [==============================] - 1s 7ms/step
86/86 [==============================] - 1s 7ms/step
Summary Stats on Predictions:
[  1.72989412e-09   3.56502824e-07   2.39531801e-07   1.20741817e-08]
[ 0.99995744  0.99999964  0.99503666  0.99475741]
[ 0.32477328  0.35003853  0.20279188  0.12239634]
[ 0.41209757  0.41973957  0.34179273  0.22641076]
[[ 19.   1.   3.   4.]
 [  2.  19.   7.   2.]
 [  1.   1.  11.   5.]
 [  5.   0.   0.   6.]]
Prediction Validation Accuracy and Loss 0.639535 8.4850242526
Total Elapsed Time 1753.997266292572
In [36]:
model.save('dog_breed_demo-arch-1-epoch-200.h5')
In [40]:
import json
json.dump(model.to_json(), open('arch-1.json', 'w'))